home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
CU Amiga Super CD-ROM 11
/
CU Amiga Magazine's Super CD-ROM 11 (1997)(EMAP Images)(GB)(Track 1 of 3)[!][issue 1997-06].iso
/
www
/
http
/
www.amigasupport.com
/
software
/
arc
/
aiff_dtc.lha
/
source
/
Class.c
< prev
next >
Wrap
C/C++ Source or Header
|
1995-01-12
|
16KB
|
729 lines
/*
** AIFF DataType
**
** Written by Olaf `Olsen' Barthel <olsen@sourcery.han.de>
** Public domain
**
** :ts=4
*/
#include "Data.h"
// Maximum supported replay rate, as per the "Amiga Hardware Reference Manual"
#define MAX_SAMPLE_RATE 28867
// The minimum sample rate we will allow when scaling a sound down
#define MIN_SAMPLE_RATE 5563
// How many bytes to read in one piece
#define MIN_FRAME_RATE (2048 * 8)
// 80 bit IEEE Standard 754 floating point number
typedef struct {
unsigned short exponent; // Exponent, bit #15 is sign bit for mantissa
unsigned long mantissa[2]; // 64 bit mantissa
} extended;
// Audio Interchange Format chunk data
#define ID_AIFF MAKE_ID('A','I','F','F')
#define ID_AIFC MAKE_ID('A','I','F','C')
#define ID_FVER MAKE_ID('F','V','E','R')
#define ID_COMM MAKE_ID('C','O','M','M')
#define ID_SSND MAKE_ID('S','S','N','D')
// "COMM" chunk header
typedef struct {
short numChannels; // Number of channels
unsigned long numSampleFrames; // Number of sample frames
short sampleSize; // Number of bits per sample point
extended sampleRate; // Replay rate in samples per second
} CommonChunk;
// The same for "AIFC" type files; this should be longer, but we don't
// need the name of the compression format
typedef struct {
short numChannels; // Number of channels
unsigned long numSampleFrames; // Number of sample frames
short sampleSize; // Number of bits per sample point
extended sampleRate; // Replay rate in samples per second
unsigned long compressionType; // Compression type
} ExtCommonChunk;
#define NO_COMPRESSION MAKE_ID('N','O','N','E') // No sound compression
// "SSND" chunk header
typedef struct {
unsigned long offset, // Offset to sound data, for block alignment
blockSize; // Size of block data is aligned to
} SampledSoundHeader;
// "FVER" chunk header
typedef struct {
long timestamp; // Format version creation date
} FormatVersionHeader;
#define AIFCVersion1 0xA2805140 // "AIFC" file format version #1
/* In StackCall.asm */
LONG __stdargs StackCall(LONG *Success,LONG StackSize,LONG ArgCount,LONG (* __stdargs Function)(...),...);
/* extended2long(const extended *ex):
*
* Convert an 80 bit IEEE Standard 754 floating point number
* into an integer value.
*/
STATIC long __regargs
extended2long(const extended *ex)
{
unsigned long mantissa = ex -> mantissa[0]; // We only need 32 bits precision
long exponent = ex -> exponent,
sign;
// Is the mantissa positive or negative?
if(exponent & 0x8000)
sign = -1;
else
sign = 1;
// Unbias the exponent
exponent = (exponent & 0x7FFF) - 0x3FFF;
// If the exponent is negative, set the mantissa to zero
if(exponent < 0)
mantissa = 0;
else
{
// Special meaning?
exponent -= 31;
// Overflow?
if(exponent > 0)
mantissa = 0x7FFFFFFF;
else
mantissa >>= -exponent; // Let the point float...
}
// That's all...
return(sign * (long)mantissa);
}
/* CloseIFFStream(struct IFFHandle *Handle,struct ClassBase *ClassBase):
*
* Close an IFFHandle and clean up the associated data.
*/
STATIC VOID __regargs
CloseIFFStream(struct IFFHandle *Handle,struct ClassBase *ClassBase)
{
CloseIFF(Handle);
CloseAsync((AsyncFile *)Handle -> iff_Stream);
FreeIFF(Handle);
}
/* OpenIFFStream(STRPTR Name,LONG *Error,struct ClassBase *ClassBase):
*
* Open an IFF file for reading.
*/
STATIC struct IFFHandle * __regargs
OpenIFFStream(STRPTR Name,LONG *Error,struct ClassBase *ClassBase)
{
struct IFFHandle *Handle;
*Error = 0;
if(Handle = AllocIFF())
{
if(Handle -> iff_Stream = (ULONG)OpenAsync(Name,MODE_READ,MIN_FRAME_RATE,SysBase,DOSBase))
{
InitIFF(Handle,IFFF_FSEEK | IFFF_RSEEK,&AsyncHook);
if(!(*Error = OpenIFF(Handle,IFFF_READ)))
return(Handle);
else
CloseAsync((AsyncFile *)Handle -> iff_Stream);
}
FreeIFF(Handle);
}
else
*Error = ERROR_NO_FREE_STORE;
return(NULL);
}
/* Flatten():
*
* Flatten the "AIFF"/"AIFC" sound data, i.e. blend all the channels
* into a single monophonic chunk and crop the data to eight bits per
* sample.
*/
STATIC VOID __regargs
Flatten(const UBYTE *Src,BYTE *Dst,const LONG BytesPerPoint,const LONG NumChannels,LONG NumFrames,const LONG Skip)
{
LONG Sum,Value,SkipCount,i;
// Make sure that the first sample is converted
SkipCount = 1;
// Convert all the frames if possible
while(NumFrames-- > 0)
{
// Add up the channel data
for(i = Sum = 0 ; i < NumChannels ; i++)
{
// The audio data is always left adjusted,
// which makes it rather easy to convert it
// to eight bits per sample
switch(BytesPerPoint)
{
case 1:
Value = (ULONG)Src[0] << 24;
break;
case 2:
Value = ((ULONG)Src[0] << 24) | ((ULONG)Src[1] << 16);
break;
case 3:
Value = ((ULONG)Src[0] << 24) | ((ULONG)Src[1] << 16) | ((ULONG)Src[2] << 8);
break;
case 4:
Value = ((ULONG)Src[0] << 24) | ((ULONG)Src[1] << 16) | ((ULONG)Src[2] << 8) | ((ULONG)Src[3]);
break;
}
// Skip to the next sample
Src += BytesPerPoint;
// Add the new sample, 16 bits only
Sum += (Value >> 8);
}
// Store this sample value?
if(--SkipCount < 1)
{
// Calculate the monophonic sample data and
// crop it to eight bits
Sum = (Sum / NumChannels) >> 16;
// Make sure that the value is in range
if(Sum < -128)
Sum = -128;
else
{
if(Sum > 127)
Sum = 127;
}
// Keep this sample
*Dst++ = (BYTE)Sum;
// Skip the next samples if necessary
SkipCount = Skip;
}
}
}
/* ConvertAIFF():
*
* Convert "AIFF"/"AIFC" audio data into plain 8 bit audio data.
*/
STATIC BOOL
ConvertAIFF(struct IFFHandle *IFFHandle,BYTE **DstPtr,struct VoiceHeader *VoiceHeader,const ULONG MemFlags,LONG *Error,struct ClassBase *ClassBase)
{
STATIC LONG Stops[] =
{
ID_AIFF,ID_COMM, // AIFF chunks
ID_AIFF,ID_SSND,
ID_AIFC,ID_FVER, // AIFC chunks
ID_AIFC,ID_COMM,
ID_AIFC,ID_SSND
};
ExtCommonChunk Common;
LONG SrcBufferSize,
SrcFrames,
DstFrames,
DstRate,
DstSkip,
Size;
FormatVersionHeader FormatHeader;
SampledSoundHeader SampleHeader;
UBYTE *SrcBuffer = NULL;
BYTE *DstBuffer = NULL;
BOOL Result = FALSE;
// There are only two/three mandatory chunks to look for
if(!(*Error = StopChunks(IFFHandle,Stops,5)))
{
struct ContextNode *Chunk;
LONG BytesPerPoint;
while(!Result && !(*Error) && !ParseIFF(IFFHandle,IFFPARSE_SCAN))
{
Chunk = CurrentChunk(IFFHandle);
switch(Chunk -> cn_ID)
{
// This is the file format version ID
case ID_FVER:
if(ReadChunkBytes(IFFHandle,&FormatHeader,sizeof(FormatVersionHeader)) != sizeof(FormatVersionHeader))
{
*Error = IFFERR_READ;
break;
}
// Does this reader support this format?
if(FormatHeader . timestamp != AIFCVersion1)
{
*Error = ERROR_NOT_IMPLEMENTED;
break;
}
break;
// This chunk is common for all "AIFF" variants
case ID_COMM:
// Determine how many bytes to read
if(Chunk -> cn_Type == ID_AIFF)
Size = sizeof(CommonChunk);
else
Size = sizeof(ExtCommonChunk);
if(ReadChunkBytes(IFFHandle,&Common,Size) != Size)
{
*Error = IFFERR_READ;
break;
}
// Is this a compressed "AIFC" file?
if(Chunk -> cn_Type == ID_AIFC && Common . compressionType != NO_COMPRESSION)
{
*Error = DTERROR_UNKNOWN_COMPRESSION;
break;
}
// Keep the basic data
DstFrames = Common . numSampleFrames;
DstRate = extended2long(&Common . sampleRate);
DstSkip = 1;
// The current Amiga audio hardware has a fixed
// replay speed limit. We will compensate for it
// by cropping the audio data if necessary.
while(DstRate > MAX_SAMPLE_RATE)
{
DstFrames = (DstFrames + 1) / 2;
DstRate /= 2;
DstSkip *= 2;
}
// See how many bytes make up one sample point
if(Common . sampleSize <= 8)
BytesPerPoint = 1;
else
{
if(Common . sampleSize <= 16)
BytesPerPoint = 2;
else
{
if(Common . sampleSize <= 24)
BytesPerPoint = 3;
else
BytesPerPoint = 4;
}
}
// Just to be sure we accept multiple "COMM" chunks
if(SrcBuffer)
FreeVec(SrcBuffer);
if(DstBuffer)
{
FreeVec(DstBuffer);
DstBuffer = NULL;
}
// Don't waste too much memory for loading
if(Common . numSampleFrames < MIN_FRAME_RATE)
SrcFrames = Common . numSampleFrames;
else
SrcFrames = MIN_FRAME_RATE;
// Allocate the decoding buffer
if(!(SrcBuffer = AllocVec(SrcBufferSize = BytesPerPoint * Common . numChannels * SrcFrames,MEMF_ANY)))
{
*Error = ERROR_NO_FREE_STORE;
break;
}
// Step down in size in order to make the allocation fit
while(DstRate > MIN_SAMPLE_RATE && DstFrames > 0 && !(DstBuffer = AllocVec(DstFrames,MemFlags | MEMF_CLEAR)))
{
DstFrames = (DstFrames + 1) / 2;
DstRate /= 2;
DstSkip *= 2;
}
// Any success?
if(!DstBuffer)
*Error = ERROR_NO_FREE_STORE;
break;
// Here follows the sampled sound; this chunk is somewhat
// equivalent to the "8SVX"/"BODY" chunk.
case ID_SSND:
// Read the data header
if(ReadChunkBytes(IFFHandle,&SampleHeader,sizeof(SampledSoundHeader)) != sizeof(SampledSoundHeader))
*Error = IFFERR_READ;
else
{
// Is the data block aligned?
if(SampleHeader . offset)
{
LONG Needed = SampleHeader . offset,Skip;
// Skip the padding data
while(!(*Error) && Needed > 0)
{
if(SrcBufferSize > Needed)
Skip = Needed;
else
Skip = SrcBufferSize;
if(ReadChunkBytes(IFFHandle,SrcBuffer,Skip) == Skip)
Needed -= Skip;
else
*Error = IFFERR_READ;
}
}
if(!(*Error))
{
LONG TotalFrames = Common . numSampleFrames,
FrameSize = BytesPerPoint * Common . numChannels,
Frames;
// Initialize the voice header
memset(VoiceHeader,0,sizeof(struct VoiceHeader));
VoiceHeader -> vh_OneShotHiSamples = DstFrames;
VoiceHeader -> vh_SamplesPerSec = DstRate;
VoiceHeader -> vh_Octaves = 1;
VoiceHeader -> vh_Compression = CMP_NONE;
VoiceHeader -> vh_Volume = 64;
*DstPtr = DstBuffer;
if(!(*Error))
{
BYTE *Dst = DstBuffer,Smallest,Largest;
ULONG i;
// Read the audio data frame by frame
while(!(*Error) && TotalFrames > 0)
{
if(SrcFrames > TotalFrames)
Frames = TotalFrames;
else
Frames = SrcFrames;
if(ReadChunkRecords(IFFHandle,SrcBuffer,FrameSize,Frames) == Frames)
{
Flatten(SrcBuffer,Dst,BytesPerPoint,Common . numChannels,Frames,DstSkip);
Dst += Frames / DstSkip;
TotalFrames -= Frames;
}
else
*Error = IFFERR_READ;
}
// Look for the smallest and the largest
// sample value
Smallest = 127;
Largest = -128;
for(i = 0 ; i < DstFrames ; i++)
{
if(DstBuffer[i] < Smallest)
Smallest = DstBuffer[i];
if(DstBuffer[i] > Largest)
Largest = DstBuffer[i];
}
// Does it use the full range?
if(Smallest > -128 || Largest < 127)
{
BYTE Table[256],*Index;
WORD j;
// Point it into the middle
Index = &Table[128];
// Scale the negative values
// to use the full dynamic
// amplitude range
for(j = Smallest ; j < 0 ; j++)
Index[j] = (-128 * j) / Smallest;
// Cut off anything below the
// smallest value
for(j = -128 ; j < Smallest ; j++)
Index[j] = -128;
Index[0] = 0;
// Scale the positive values
// to use the full dynamic
// amplitude range
for(j = 1 ; j <= Largest ; j++)
Index[j] = (127 * j) / Largest;
// Cut off anything above the
// largest value
for(j = Largest + 1 ; j < 256 ; j++)
Index[j] = 127;
// Make the data use the full range
for(i = 0 ; i < DstFrames ; i++)
DstBuffer[i] = Index[DstBuffer[i]];
}
Result = TRUE;
}
}
}
break;
}
}
}
// Clean up...
if(SrcBuffer)
FreeVec(SrcBuffer);
if(DstBuffer && (*Error || !Result))
FreeVec(DstBuffer);
if(*Error)
Result = FALSE;
return(Result);
}
/* GetAIFF(Object *object,struct TagItem *Tags,struct ClassBase *ClassBase):
*
* Create a datatypes object from an "AIFF"/"AIFC" file suitable for
* MultiView, etc. to display or replay.
*/
STATIC BOOL __regargs
GetAIFF(Object *object,struct TagItem *Tags,struct ClassBase *ClassBase)
{
struct VoiceHeader *VoiceHeader = NULL;
BPTR File = NULL;
LONG Error = 0;
STRPTR Title = (STRPTR)GetTagData(DTA_Name,NULL,Tags);
BOOL Result = FALSE;
// Get the basic data
GetDTAttrs(object,
SDTA_VoiceHeader, &VoiceHeader,
DTA_Handle, &File,
TAG_DONE);
// Do we have everything we need?
if(File && VoiceHeader && Title)
{
struct IFFHandle *Handle;
// Open the IFF file for reading
if(Handle = OpenIFFStream(Title,&Error,ClassBase))
{
BYTE *Sample;
ULONG Memory;
// sound.datatype v40 no longer requires
// the entire sample to reside in chip memory
if(SuperClassBase -> lib_Version > 39)
Memory = MEMF_ANY;
else
Memory = MEMF_CHIP;
// Convert the audio file
if(ConvertAIFF(Handle,&Sample,VoiceHeader,Memory,&Error,ClassBase))
{
// Fill in the remaining information
SetDTAttrs(object,NULL,NULL,
DTA_ObjName, Title,
SDTA_Sample, Sample,
SDTA_SampleLength, VoiceHeader -> vh_OneShotHiSamples,
SDTA_Period, (ULONG)(SysBase -> ex_EClockFrequency * 5) / (ULONG)VoiceHeader -> vh_SamplesPerSec,
SDTA_Volume, 64,
SDTA_Cycles, 1,
TAG_DONE);
Result = TRUE;
}
// Clean up
CloseIFFStream(Handle,ClassBase);
}
}
else
Error = ERROR_OBJECT_NOT_FOUND;
if(Error)
SetIoErr(Error);
return(Result);
}
/* ClassDispatch():
*
* The class dispatcher routine.
*/
STATIC Object * __stdargs
RealClassDispatch(Class *class,Object *object,Msg msg)
{
struct ClassBase *ClassBase = (struct ClassBase *)class -> cl_UserData;
Object *Result;
// What message is it?
switch(msg -> MethodID)
{
// Create a new instance
case OM_NEW:
if(Result = (Object *)DoSuperMethodA(class,object,msg))
{
if(!GetAIFF(Result,((struct opSet *)msg) -> ops_AttrList,ClassBase))
{
CoerceMethod(class,Result,OM_DISPOSE);
Result = NULL;
}
}
break;
// Let the superclass handle the rest
default:
Result = (Object *)DoSuperMethodA(class,object,msg);
break;
}
return(Result);
}
/* ClassDispatch():
*
* The frontend to the real class dispatcher routine.
*/
Object * __saveds __asm
ClassDispatch(register __a0 Class *class,register __a2 Object *object,register __a1 Msg msg)
{
LONG Success;
return((Object *)StackCall(&Success,8192,3,(LONG (* __stdargs)(...))RealClassDispatch,class,object,msg));
}